#include <xeno/sched.h>
#include <xeno/event.h>
#include <asm/domain_page.h>
+#include <asm/msr.h>
extern unsigned int alloc_new_dom_mem(struct task_struct *, unsigned int);
unmap_domain_mem(list);
}
+
+static int msr_cpu_mask;
+static unsigned long msr_addr;
+static unsigned long msr_lo;
+static unsigned long msr_hi;
+
+static void write_msr_for(void *unused)
+{
+ if (((1 << current->processor) & msr_cpu_mask))
+ wrmsr(msr_addr, msr_lo, msr_hi);
+}
+
+static void read_msr_for(void *unused)
+{
+ if (((1 << current->processor) & msr_cpu_mask))
+ rdmsr(msr_addr, msr_lo, msr_hi);
+}
+
long do_dom0_op(dom0_op_t *u_dom0_op)
{
}
break;
+ case DOM0_MSR:
+ {
+ if (op.u.msr.write)
+ {
+ msr_cpu_mask = op.u.msr.cpu_mask;
+ msr_addr = op.u.msr.msr;
+ msr_lo = op.u.msr.in1;
+ msr_hi = op.u.msr.in2;
+ smp_call_function(write_msr_for, NULL, 1, 1);
+ write_msr_for(NULL);
+ }
+ else
+ {
+ msr_cpu_mask = op.u.msr.cpu_mask;
+ msr_addr = op.u.msr.msr;
+ smp_call_function(read_msr_for, NULL, 1, 1);
+ read_msr_for(NULL);
+
+ op.u.msr.out1 = msr_lo;
+ op.u.msr.out2 = msr_hi;
+ copy_to_user(u_dom0_op, &op, sizeof(op));
+ }
+ ret = 0;
+ }
+ break;
+
+
default:
ret = -ENOSYS;
read_unlock_irqrestore(&tasklist_lock, flags);
}
+void cpu_counters(u_char key, void *dev_id, struct pt_regs *regs)
+{
+ printk("CPU performance counters for CPU %d (current):\n",
+ smp_processor_id());
+ {
+ unsigned int one1,one2,zero1,zero2;
+ rdmsr(MSR_P6_PERFCTR0, zero1, zero2);
+ rdmsr(MSR_P6_PERFCTR1, one1, one2);
+ printk("CPU%02d counter0=0x%02x:%08x counter1=0x%02x:%08x\n",
+ smp_processor_id(), zero2,zero1,one2,one1 );
+ }
+}
+
+void cpu_counters_reset(u_char key, void *dev_id, struct pt_regs *regs)
+{
+ printk("Reset CPU performance counters for CPU %d (current):\n",
+ smp_processor_id());
+ wrmsr(MSR_P6_PERFCTR0,0,0);
+ wrmsr(MSR_P6_PERFCTR1,0,0);
+}
extern void perfc_printall (u_char key, void *dev_id, struct pt_regs *regs);
extern void perfc_reset (u_char key, void *dev_id, struct pt_regs *regs);
add_key_handler('r', dump_runq, "dump run queues");
add_key_handler('B', kill_dom0, "reboot machine gracefully");
add_key_handler('R', halt_machine, "reboot machine ungracefully");
-
+ add_key_handler('c', cpu_counters, "CPU performance counters");
+ add_key_handler('C', cpu_counters_reset,"reset CPU perfomance counters");
return;
}
--- /dev/null
+#ifndef __ASM_MSR_H
+#define __ASM_MSR_H
+
+/*
+ * Access to machine-specific registers (available on 586 and better only)
+ * Note: the rd* operations modify the parameters directly (without using
+ * pointer indirection), this allows gcc to optimize better
+ */
+
+#define rdmsr(msr,val1,val2) \
+{ \
+ dom0_op_t op; \
+ op.cmd = DOM0_MSR; \
+ op.u.msr.write = 0; \
+ op.u.msr.msr = msr; \
+ op.u.msr.cpu_mask = (1 << current->processor); \
+ HYPERVISOR_dom0_op(&op); \
+ val1 = op.u.msr.out1; \
+ val2 = op.u.msr.out2; \
+}
+
+#define wrmsr(msr,val1,val2) \
+{ \
+ dom0_op_t op; \
+ op.cmd = DOM0_MSR; \
+ op.u.msr.write = 1; \
+ op.u.msr.cpu_mask = (1 << current->processor); \
+ op.u.msr.msr = msr; \
+ op.u.msr.in1 = val1; \
+ op.u.msr.in2 = val2; \
+ HYPERVISOR_dom0_op(&op); \
+}
+
+#define rdtsc(low,high) \
+ __asm__ __volatile__("rdtsc" : "=a" (low), "=d" (high))
+
+#define rdtscl(low) \
+ __asm__ __volatile__("rdtsc" : "=a" (low) : : "edx")
+
+#define rdtscll(val) \
+ __asm__ __volatile__("rdtsc" : "=A" (val))
+
+#define write_tsc(val1,val2) wrmsr(0x10, val1, val2)
+
+#define rdpmc(counter,low,high) \
+ __asm__ __volatile__("rdpmc" \
+ : "=a" (low), "=d" (high) \
+ : "c" (counter))
+
+/* symbolic names for some interesting MSRs */
+/* Intel defined MSRs. */
+#define MSR_IA32_P5_MC_ADDR 0
+#define MSR_IA32_P5_MC_TYPE 1
+#define MSR_IA32_PLATFORM_ID 0x17
+#define MSR_IA32_EBL_CR_POWERON 0x2a
+
+#define MSR_IA32_APICBASE 0x1b
+#define MSR_IA32_APICBASE_BSP (1<<8)
+#define MSR_IA32_APICBASE_ENABLE (1<<11)
+#define MSR_IA32_APICBASE_BASE (0xfffff<<12)
+
+#define MSR_IA32_UCODE_WRITE 0x79
+#define MSR_IA32_UCODE_REV 0x8b
+
+#define MSR_IA32_BBL_CR_CTL 0x119
+
+#define MSR_IA32_MCG_CAP 0x179
+#define MSR_IA32_MCG_STATUS 0x17a
+#define MSR_IA32_MCG_CTL 0x17b
+
+#define MSR_IA32_THERM_CONTROL 0x19a
+#define MSR_IA32_THERM_INTERRUPT 0x19b
+#define MSR_IA32_THERM_STATUS 0x19c
+#define MSR_IA32_MISC_ENABLE 0x1a0
+
+#define MSR_IA32_DEBUGCTLMSR 0x1d9
+#define MSR_IA32_LASTBRANCHFROMIP 0x1db
+#define MSR_IA32_LASTBRANCHTOIP 0x1dc
+#define MSR_IA32_LASTINTFROMIP 0x1dd
+#define MSR_IA32_LASTINTTOIP 0x1de
+
+#define MSR_IA32_MC0_CTL 0x400
+#define MSR_IA32_MC0_STATUS 0x401
+#define MSR_IA32_MC0_ADDR 0x402
+#define MSR_IA32_MC0_MISC 0x403
+
+#define MSR_P6_PERFCTR0 0xc1
+#define MSR_P6_PERFCTR1 0xc2
+#define MSR_P6_EVNTSEL0 0x186
+#define MSR_P6_EVNTSEL1 0x187
+
+#define MSR_IA32_PERF_STATUS 0x198
+#define MSR_IA32_PERF_CTL 0x199
+
+/* AMD Defined MSRs */
+#define MSR_K6_EFER 0xC0000080
+#define MSR_K6_STAR 0xC0000081
+#define MSR_K6_WHCR 0xC0000082
+#define MSR_K6_UWCCR 0xC0000085
+#define MSR_K6_EPMR 0xC0000086
+#define MSR_K6_PSOR 0xC0000087
+#define MSR_K6_PFIR 0xC0000088
+
+#define MSR_K7_EVNTSEL0 0xC0010000
+#define MSR_K7_PERFCTR0 0xC0010004
+#define MSR_K7_HWCR 0xC0010015
+#define MSR_K7_CLK_CTL 0xC001001b
+#define MSR_K7_FID_VID_CTL 0xC0010041
+#define MSR_K7_VID_STATUS 0xC0010042
+
+/* Centaur-Hauls/IDT defined MSRs. */
+#define MSR_IDT_FCR1 0x107
+#define MSR_IDT_FCR2 0x108
+#define MSR_IDT_FCR3 0x109
+#define MSR_IDT_FCR4 0x10a
+
+#define MSR_IDT_MCR0 0x110
+#define MSR_IDT_MCR1 0x111
+#define MSR_IDT_MCR2 0x112
+#define MSR_IDT_MCR3 0x113
+#define MSR_IDT_MCR4 0x114
+#define MSR_IDT_MCR5 0x115
+#define MSR_IDT_MCR6 0x116
+#define MSR_IDT_MCR7 0x117
+#define MSR_IDT_MCR_CTRL 0x120
+
+/* VIA Cyrix defined MSRs*/
+#define MSR_VIA_FCR 0x1107
+#define MSR_VIA_LONGHAUL 0x110a
+#define MSR_VIA_BCR2 0x1147
+
+/* Transmeta defined MSRs */
+#define MSR_TMTA_LONGRUN_CTRL 0x80868010
+#define MSR_TMTA_LONGRUN_FLAGS 0x80868011
+#define MSR_TMTA_LRTI_READOUT 0x80868018
+#define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a
+
+#endif /* __ASM_MSR_H */